From 65cd10c6b43e4f4037fe089d704f4942a82192a5 Mon Sep 17 00:00:00 2001 From: "djm@kirby.fc.hp.com" Date: Wed, 12 Oct 2005 17:12:59 -0600 Subject: [PATCH] Initial SMP support Signed-off by: Tristan Gingold --- xen/arch/ia64/linux-xen/head.S | 6 +- xen/arch/ia64/linux-xen/irq_ia64.c | 3 + xen/arch/ia64/linux-xen/mm_contig.c | 4 +- xen/arch/ia64/linux-xen/setup.c | 44 +++++++++----- xen/arch/ia64/linux-xen/smp.c | 27 ++++++--- xen/arch/ia64/linux-xen/smpboot.c | 16 +++++ xen/arch/ia64/xen/acpi.c | 21 +++++-- xen/arch/ia64/xen/domain.c | 60 +++++++++---------- xen/arch/ia64/xen/process.c | 12 ++++ xen/arch/ia64/xen/xenirq.c | 18 +++--- xen/arch/ia64/xen/xenmisc.c | 5 +- xen/arch/ia64/xen/xensetup.c | 31 ++++++---- xen/arch/ia64/xen/xentime.c | 6 +- xen/include/asm-ia64/config.h | 12 ++-- xen/include/asm-ia64/linux-xen/asm/spinlock.h | 9 +++ .../asm-ia64/linux-xen/linux/hardirq.h | 4 -- .../asm-ia64/linux-xen/linux/interrupt.h | 2 + 17 files changed, 184 insertions(+), 96 deletions(-) diff --git a/xen/arch/ia64/linux-xen/head.S b/xen/arch/ia64/linux-xen/head.S index 9da143d979..14e919ddf0 100644 --- a/xen/arch/ia64/linux-xen/head.S +++ b/xen/arch/ia64/linux-xen/head.S @@ -324,6 +324,9 @@ start_ap: mov r16=-1 (isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it +#ifndef XEN + // XEN: stack is allocated in xenheap, which is currently always + // mapped. // load mapping for stack (virtaddr in r2, physaddr in r3) rsm psr.ic movl r17=PAGE_KERNEL @@ -353,7 +356,8 @@ start_ap: ssm psr.ic srlz.d ;; - +#endif + .load_current: // load the "current" pointer (r13) and ar.k6 with the current task #if defined(XEN) && defined(VALIDATE_VT) diff --git a/xen/arch/ia64/linux-xen/irq_ia64.c b/xen/arch/ia64/linux-xen/irq_ia64.c index 5436ef8694..c036ed08ee 100644 --- a/xen/arch/ia64/linux-xen/irq_ia64.c +++ b/xen/arch/ia64/linux-xen/irq_ia64.c @@ -281,5 +281,8 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) ipi_data = (delivery_mode << 8) | (vector & 0xff); ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); +#ifdef XEN + printf ("send_ipi to %d (%x)\n", cpu, phys_cpu_id); +#endif writeq(ipi_data, ipi_addr); } diff --git a/xen/arch/ia64/linux-xen/mm_contig.c b/xen/arch/ia64/linux-xen/mm_contig.c index 6b9ee53aca..896a871c3b 100644 --- a/xen/arch/ia64/linux-xen/mm_contig.c +++ b/xen/arch/ia64/linux-xen/mm_contig.c @@ -193,8 +193,8 @@ per_cpu_init (void) */ if (smp_processor_id() == 0) { #ifdef XEN - cpu_data = alloc_xenheap_pages(PERCPU_PAGE_SHIFT - - PAGE_SHIFT + get_order(NR_CPUS)); + cpu_data = alloc_xenheap_pages(get_order(NR_CPUS + * PERCPU_PAGE_SIZE)); #else cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); diff --git a/xen/arch/ia64/linux-xen/setup.c b/xen/arch/ia64/linux-xen/setup.c index ba7f91098d..7f351dd05c 100644 --- a/xen/arch/ia64/linux-xen/setup.c +++ b/xen/arch/ia64/linux-xen/setup.c @@ -366,6 +366,7 @@ check_for_logical_procs (void) } #endif +void __init #ifdef XEN early_setup_arch (char **cmdline_p) #else @@ -377,14 +378,12 @@ setup_arch (char **cmdline_p) ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); *cmdline_p = __va(ia64_boot_param->command_line); -#ifdef XEN - efi_init(); -#else +#ifndef XEN strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); +#endif efi_init(); io_port_init(); -#endif #ifdef CONFIG_IA64_GENERIC { @@ -414,11 +413,17 @@ setup_arch (char **cmdline_p) #ifdef XEN early_cmdline_parse(cmdline_p); cmdline_parse(*cmdline_p); -#undef CONFIG_ACPI_BOOT #endif if (early_console_setup(*cmdline_p) == 0) mark_bsp_online(); +#ifdef XEN +} + +void __init +late_setup_arch (char **cmdline_p) +{ +#endif #ifdef CONFIG_ACPI_BOOT /* Initialize the ACPI boot-time table parser */ acpi_table_init(); @@ -433,20 +438,16 @@ setup_arch (char **cmdline_p) #ifndef XEN find_memory(); -#else - io_port_init(); -} - -void __init -late_setup_arch (char **cmdline_p) -{ -#undef CONFIG_ACPI_BOOT - acpi_table_init(); #endif + /* process SAL system table: */ ia64_sal_init(efi.sal_systab); #ifdef CONFIG_SMP +#ifdef XEN + init_smp_config (); +#endif + cpu_physical_id(0) = hard_smp_processor_id(); cpu_set(0, cpu_sibling_map[0]); @@ -768,6 +769,11 @@ cpu_init (void) cpu_data = per_cpu_init(); +#ifdef XEN + printf ("cpu_init: current=%p, current->domain->arch.mm=%p\n", + current, current->domain->arch.mm); +#endif + /* * We set ar.k3 so that assembly code in MCA handler can compute * physical addresses of per cpu variables with a simple: @@ -887,6 +893,16 @@ cpu_init (void) #ifndef XEN pm_idle = default_idle; #endif + +#ifdef XEN + /* surrender usage of kernel registers to domain, use percpu area instead */ + __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE); + __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA); + __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK); + __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER); + __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT); + __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE); +#endif } void diff --git a/xen/arch/ia64/linux-xen/smp.c b/xen/arch/ia64/linux-xen/smp.c index 28c294a247..74f932137e 100644 --- a/xen/arch/ia64/linux-xen/smp.c +++ b/xen/arch/ia64/linux-xen/smp.c @@ -63,9 +63,18 @@ void flush_tlb_mask(cpumask_t mask) //Huh? This seems to be used on ia64 even if !CONFIG_SMP void smp_send_event_check_mask(cpumask_t mask) { - printf("smp_send_event_check_mask called\n"); - //dummy(); - //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR); + int cpu; + + /* Not for me. */ + cpu_clear(smp_processor_id(), mask); + if (cpus_empty(mask)) + return; + + printf("smp_send_event_check_mask called\n"); + + for (cpu = 0; cpu < NR_CPUS; ++cpu) + if (cpu_isset(cpu, mask)) + platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } @@ -249,6 +258,7 @@ send_IPI_self (int op) send_IPI_single(smp_processor_id(), op); } +#ifndef XEN /* * Called with preeemption disabled. */ @@ -257,6 +267,7 @@ smp_send_reschedule (int cpu) { platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } +#endif void smp_flush_tlb_all (void) @@ -395,15 +406,14 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai if (wait) atomic_set(&data.finished, 0); - printk("smp_call_function: about to spin_lock \n"); spin_lock(&call_lock); - printk("smp_call_function: done with spin_lock \n"); +#if 0 //def XEN + printk("smp_call_function: %d lock\n", smp_processor_id ()); +#endif call_data = &data; mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ - printk("smp_call_function: about to send_IPI \n"); send_IPI_allbutself(IPI_CALL_FUNC); - printk("smp_call_function: done with send_IPI \n"); /* Wait for response */ while (atomic_read(&data.started) != cpus) @@ -414,9 +424,10 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai cpu_relax(); call_data = NULL; - printk("smp_call_function: about to spin_unlock \n"); spin_unlock(&call_lock); +#if 0 //def XEN printk("smp_call_function: DONE WITH spin_unlock, returning \n"); +#endif return 0; } EXPORT_SYMBOL(smp_call_function); diff --git a/xen/arch/ia64/linux-xen/smpboot.c b/xen/arch/ia64/linux-xen/smpboot.c index 1c0e84e31d..89f6829648 100644 --- a/xen/arch/ia64/linux-xen/smpboot.c +++ b/xen/arch/ia64/linux-xen/smpboot.c @@ -477,6 +477,22 @@ do_boot_cpu (int sapicid, int cpu) do_rest: task_for_booting_cpu = c_idle.idle; +#else + struct domain *idle; + struct vcpu *v; + void *stack; + + if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL ) + panic("failed 'createdomain' for CPU %d", cpu); + set_bit(_DOMF_idle_domain, &idle->domain_flags); + v = idle->vcpu[0]; + + printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v); + + task_for_booting_cpu = v; + + /* Set cpu number. */ + get_thread_info(v)->cpu = cpu; #endif Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); diff --git a/xen/arch/ia64/xen/acpi.c b/xen/arch/ia64/xen/acpi.c index 6dbc687b8b..f0eab76870 100644 --- a/xen/arch/ia64/xen/acpi.c +++ b/xen/arch/ia64/xen/acpi.c @@ -121,6 +121,7 @@ acpi_get_sysname (void) #ifdef CONFIG_ACPI_BOOT #define ACPI_MAX_PLATFORM_INTERRUPTS 256 +#define NR_IOSAPICS 4 #if 0 /* Array to record platform interrupt vectors for generic interrupt routing. */ @@ -162,7 +163,6 @@ static int available_cpus __initdata; struct acpi_table_madt * acpi_madt __initdata; static u8 has_8259; -#if 0 static int __init acpi_parse_lapic_addr_ovr ( acpi_table_entry_header *header, const unsigned long end) @@ -247,12 +247,13 @@ acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) acpi_table_print_madt_entry(header); +#if 0 iosapic_init(iosapic->address, iosapic->global_irq_base); +#endif return 0; } - static int __init acpi_parse_plat_int_src ( acpi_table_entry_header *header, const unsigned long end) @@ -267,6 +268,7 @@ acpi_parse_plat_int_src ( acpi_table_print_madt_entry(header); +#if 0 /* * Get vector assignment for this interrupt, set attributes, * and program the IOSAPIC routing table. @@ -280,6 +282,7 @@ acpi_parse_plat_int_src ( (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; +#endif return 0; } @@ -297,13 +300,14 @@ acpi_parse_int_src_ovr ( acpi_table_print_madt_entry(header); +#if 0 iosapic_override_isa_irq(p->bus_irq, p->global_irq, (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); +#endif return 0; } - static int __init acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) { @@ -331,8 +335,10 @@ void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) */ sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; +#if 0 /*Start cyclone clock*/ cyclone_setup(0); +#endif } } @@ -350,7 +356,9 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) #else has_8259 = acpi_madt->flags.pcat_compat; #endif +#if 0 iosapic_system_init(has_8259); +#endif /* Get base address of IPI Message Block */ @@ -364,7 +372,6 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) return 0; } -#endif #ifdef CONFIG_ACPI_NUMA @@ -529,6 +536,7 @@ acpi_register_gsi (u32 gsi, int polarity, int trigger) return acpi_register_irq(gsi, polarity, trigger); } EXPORT_SYMBOL(acpi_register_gsi); +#endif static int __init acpi_parse_fadt (unsigned long phys_addr, unsigned long size) { @@ -550,10 +558,11 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size) if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES) acpi_legacy_devices = 1; +#if 0 acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE); +#endif return 0; } -#endif unsigned long __init acpi_find_rsdp (void) @@ -567,7 +576,6 @@ acpi_find_rsdp (void) return rsdp_phys; } -#if 0 int __init acpi_boot_init (void) { @@ -646,6 +654,7 @@ acpi_boot_init (void) printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0; } +#if 0 int acpi_gsi_to_irq (u32 gsi, unsigned int *irq) { diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c index 4cd54a0b8c..7c9b0ddc11 100644 --- a/xen/arch/ia64/xen/domain.c +++ b/xen/arch/ia64/xen/domain.c @@ -23,6 +23,7 @@ #include #include #include +#include //#include #include #include @@ -75,35 +76,21 @@ void free_perdomain_pt(struct domain *d) //free_page((unsigned long)d->mm.perdomain_pt); } -int hlt_counter; - -void disable_hlt(void) -{ - hlt_counter++; -} - -void enable_hlt(void) -{ - hlt_counter--; -} - static void default_idle(void) { - if ( hlt_counter == 0 ) - { + int cpu = smp_processor_id(); local_irq_disable(); - if ( !softirq_pending(smp_processor_id()) ) + if ( !softirq_pending(cpu)) safe_halt(); - //else - local_irq_enable(); - } + local_irq_enable(); } -void continue_cpu_idle_loop(void) +static void continue_cpu_idle_loop(void) { int cpu = smp_processor_id(); for ( ; ; ) { + printf ("idle%dD\n", cpu); #ifdef IA64 // __IRQ_STAT(cpu, idle_timestamp) = jiffies #else @@ -111,23 +98,32 @@ void continue_cpu_idle_loop(void) #endif while ( !softirq_pending(cpu) ) default_idle(); + add_preempt_count(SOFTIRQ_OFFSET); raise_softirq(SCHEDULE_SOFTIRQ); do_softirq(); + sub_preempt_count(SOFTIRQ_OFFSET); } } void startup_cpu_idle_loop(void) { + int cpu = smp_processor_id (); /* Just some sanity to ensure that the scheduler is set up okay. */ ASSERT(current->domain == IDLE_DOMAIN_ID); + printf ("idle%dA\n", cpu); raise_softirq(SCHEDULE_SOFTIRQ); +#if 0 /* All this work is done within continue_cpu_idle_loop */ + printf ("idle%dB\n", cpu); + asm volatile ("mov ar.k2=r0"); do_softirq(); + printf ("idle%dC\n", cpu); /* * Declares CPU setup done to the boot processor. * Therefore memory barrier to ensure state is visible. */ smp_mb(); +#endif #if 0 //do we have to ensure the idle task has a shared page so that, for example, //region registers can be loaded from it. Apparently not... @@ -229,17 +225,21 @@ void arch_do_createdomain(struct vcpu *v) v->arch.breakimm = d->arch.breakimm; d->arch.sys_pgnr = 0; - d->arch.mm = xmalloc(struct mm_struct); - if (unlikely(!d->arch.mm)) { - printk("Can't allocate mm_struct for domain %d\n",d->domain_id); - return -ENOMEM; - } - memset(d->arch.mm, 0, sizeof(*d->arch.mm)); - d->arch.mm->pgd = pgd_alloc(d->arch.mm); - if (unlikely(!d->arch.mm->pgd)) { - printk("Can't allocate pgd for domain %d\n",d->domain_id); - return -ENOMEM; - } + if (d->domain_id != IDLE_DOMAIN_ID) { + d->arch.mm = xmalloc(struct mm_struct); + if (unlikely(!d->arch.mm)) { + printk("Can't allocate mm_struct for domain %d\n",d->domain_id); + return -ENOMEM; + } + memset(d->arch.mm, 0, sizeof(*d->arch.mm)); + d->arch.mm->pgd = pgd_alloc(d->arch.mm); + if (unlikely(!d->arch.mm->pgd)) { + printk("Can't allocate pgd for domain %d\n",d->domain_id); + return -ENOMEM; + } + } else + d->arch.mm = NULL; + printf ("arch_do_create_domain: domain=%p\n", d); } void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c) diff --git a/xen/arch/ia64/xen/process.c b/xen/arch/ia64/xen/process.c index b49a94d8a8..1432002032 100644 --- a/xen/arch/ia64/xen/process.c +++ b/xen/arch/ia64/xen/process.c @@ -62,11 +62,23 @@ long do_iopl(domid_t domain, unsigned int new_io_pl) return 0; } +#include + +extern struct schedule_data schedule_data[NR_CPUS]; + void schedule_tail(struct vcpu *next) { unsigned long rr7; //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info); //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info); + + // TG: Real HACK FIXME. + // This is currently necessary because when a new domain is started, + // the context_switch function of xen/common/schedule.c(__enter_scheduler) + // never returns. Therefore, the lock must be released. + // schedule_tail is only called when a domain is started. + spin_unlock_irq(&schedule_data[current->processor].schedule_lock); + /* rr7 will be postponed to last point when resuming back to guest */ if(VMX_DOMAIN(current)){ vmx_load_all_rr(current); diff --git a/xen/arch/ia64/xen/xenirq.c b/xen/arch/ia64/xen/xenirq.c index bf4778e01e..e66b8ac057 100644 --- a/xen/arch/ia64/xen/xenirq.c +++ b/xen/arch/ia64/xen/xenirq.c @@ -35,7 +35,7 @@ xen_debug_irq(ia64_vector vector, struct pt_regs *regs) int xen_do_IRQ(ia64_vector vector) { - if (vector != 0xef) { + if (vector != IA64_TIMER_VECTOR && vector != IA64_IPI_VECTOR) { extern void vcpu_pend_interrupt(void *, int); #if 0 if (firsttime[vector]) { @@ -57,22 +57,18 @@ xen_do_IRQ(ia64_vector vector) return(0); } -/* From linux/kernel/softirq.c */ -#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED -# define invoke_softirq() __do_softirq() -#else -# define invoke_softirq() do_softirq() -#endif - /* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { //account_system_vtime(current); - //sub_preempt_count(IRQ_EXIT_OFFSET); - if (!in_interrupt() && local_softirq_pending()) - invoke_softirq(); + sub_preempt_count(IRQ_EXIT_OFFSET); + if (!in_interrupt() && local_softirq_pending()) { + add_preempt_count(SOFTIRQ_OFFSET); + do_softirq(); + sub_preempt_count(SOFTIRQ_OFFSET); + } //preempt_enable_no_resched(); } /* end from linux/kernel/softirq.c */ diff --git a/xen/arch/ia64/xen/xenmisc.c b/xen/arch/ia64/xen/xenmisc.c index e25a8fa1f0..e62115ebbd 100644 --- a/xen/arch/ia64/xen/xenmisc.c +++ b/xen/arch/ia64/xen/xenmisc.c @@ -280,6 +280,8 @@ void cs01foo(void) {} unsigned long context_switch_count = 0; +#include + void context_switch(struct vcpu *prev, struct vcpu *next) { //printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); @@ -287,7 +289,8 @@ void context_switch(struct vcpu *prev, struct vcpu *next) //prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff); //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo(); //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo(); -//printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id); +printk("@@sw%d/%x %d->%d\n",smp_processor_id(), hard_smp_processor_id (), + prev->domain->domain_id,next->domain->domain_id); if(VMX_DOMAIN(prev)){ vtm_domain_out(prev); } diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c index 8b56f9817e..49d11862a5 100644 --- a/xen/arch/ia64/xen/xensetup.c +++ b/xen/arch/ia64/xen/xensetup.c @@ -253,11 +253,11 @@ void start_kernel(void) printk("About to call scheduler_init()\n"); scheduler_init(); local_irq_disable(); + init_IRQ (); printk("About to call init_xen_time()\n"); init_xen_time(); /* initialise the time */ printk("About to call ac_timer_init()\n"); ac_timer_init(); -// init_xen_time(); ??? #ifdef CONFIG_SMP if ( opt_nosmp ) @@ -276,6 +276,9 @@ printk("About to call ac_timer_init()\n"); //BUG_ON(!local_irq_is_enabled()); + /* Enable IRQ to receive IPI (needed for ITC sync). */ + local_irq_enable(); + printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus); for_each_present_cpu ( i ) { @@ -287,24 +290,16 @@ printk("About to call __cpu_up(%d)\n",i); } } + local_irq_disable(); + printk("Brought up %ld CPUs\n", (long)num_online_cpus()); smp_cpus_done(max_cpus); #endif - - // FIXME: Should the following be swapped and moved later? - schedulers_start(); do_initcalls(); printk("About to call sort_main_extable()\n"); sort_main_extable(); - /* surrender usage of kernel registers to domain, use percpu area instead */ - __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE); - __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA); - __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK); - __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER); - __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT); - __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE); /* Create initial domain 0. */ printk("About to call do_createdomain()\n"); @@ -342,6 +337,11 @@ printk("About to call construct_dom0()\n"); 0, 0) != 0) panic("Could not set up DOM0 guest OS\n"); + + /* PIN domain0 on CPU 0. */ + dom0->vcpu[0]->cpumap=1; + set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags); + #ifdef CLONE_DOMAIN0 { int i; @@ -379,9 +379,16 @@ printk("About to call init_trace_bufs()\n"); domain_unpause_by_systemcontroller(clones[i]); } #endif - domain_unpause_by_systemcontroller(dom0); domain0_ready = 1; + local_irq_enable(); + + printf("About to call schedulers_start dom0=%p, idle0_dom=%p\n", + dom0, &idle0_domain); + schedulers_start(); + + domain_unpause_by_systemcontroller(dom0); + printk("About to call startup_cpu_idle_loop()\n"); startup_cpu_idle_loop(); } diff --git a/xen/arch/ia64/xen/xentime.c b/xen/arch/ia64/xen/xentime.c index 30f3c111ce..75a454b0ed 100644 --- a/xen/arch/ia64/xen/xentime.c +++ b/xen/arch/ia64/xen/xentime.c @@ -103,10 +103,10 @@ xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) #ifdef HEARTBEAT_FREQ static long count = 0; if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) { - printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n", - regs->cr_iip, + printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */ + regs->cr_iip /*, VCPU(current,interrupt_delivery_enabled), - VCPU(current,pending_interruption)); + VCPU(current,pending_interruption) */); count = 0; } #endif diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h index 1d3ba8dabd..10a9b38739 100644 --- a/xen/include/asm-ia64/config.h +++ b/xen/include/asm-ia64/config.h @@ -28,8 +28,8 @@ #ifdef CONFIG_XEN_SMP #define CONFIG_SMP 1 -#define NR_CPUS 2 -#define CONFIG_NR_CPUS 2 +#define NR_CPUS 8 +#define CONFIG_NR_CPUS 8 #else #undef CONFIG_SMP #define NR_CPUS 1 @@ -123,8 +123,7 @@ extern char _end[]; /* standard ELF symbol */ #ifdef CONFIG_SMP #warning "Lots of things to fix to enable CONFIG_SMP!" #endif -// FIXME SMP -#define get_cpu() 0 +#define get_cpu() smp_processor_id() #define put_cpu() do {} while(0) // needed for common/dom0_ops.c until hyperthreading is supported @@ -140,6 +139,7 @@ struct page; // function calls; see decl in xen/include/xen/sched.h #undef free_task_struct #undef alloc_task_struct +#define get_thread_info(v) alloc_thread_info(v) // initial task has a different name in Xen //#define idle0_task init_task @@ -299,7 +299,11 @@ extern int ht_per_core; #endif /* __XEN_IA64_CONFIG_H__ */ // needed for include/xen/smp.h +#ifdef CONFIG_SMP +#define __smp_processor_id() current_thread_info()->cpu +#else #define __smp_processor_id() 0 +#endif // FOLLOWING ADDED FOR XEN POST-NGIO and/or LINUX 2.6.7 diff --git a/xen/include/asm-ia64/linux-xen/asm/spinlock.h b/xen/include/asm-ia64/linux-xen/asm/spinlock.h index c2d32e7a48..1c2034c80d 100644 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h @@ -17,11 +17,16 @@ #include #include +#define DEBUG_SPINLOCK + typedef struct { volatile unsigned int lock; #ifdef CONFIG_PREEMPT unsigned int break_lock; #endif +#ifdef DEBUG_SPINLOCK + void *locker; +#endif #ifdef XEN unsigned char recurse_cpu; unsigned char recurse_cnt; @@ -96,6 +101,10 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); # endif /* CONFIG_MCKINLEY */ #endif + +#ifdef DEBUG_SPINLOCK + asm volatile ("mov %0=ip" : "=r" (lock->locker)); +#endif } #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ diff --git a/xen/include/asm-ia64/linux-xen/linux/hardirq.h b/xen/include/asm-ia64/linux-xen/linux/hardirq.h index 2431491726..b076644e24 100644 --- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h +++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h @@ -67,11 +67,7 @@ */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) -#ifdef XEN -#define in_interrupt() 0 // FIXME SMP LATER -#else #define in_interrupt() (irq_count()) -#endif #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) diff --git a/xen/include/asm-ia64/linux-xen/linux/interrupt.h b/xen/include/asm-ia64/linux-xen/linux/interrupt.h index caea47d641..b88e9457f1 100644 --- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h +++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h @@ -88,6 +88,7 @@ static inline void __deprecated save_and_cli(unsigned long *x) #define save_and_cli(x) save_and_cli(&x) #endif /* CONFIG_SMP */ +#ifndef XEN /* SoftIRQ primitives. */ #define local_bh_disable() \ do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) @@ -95,6 +96,7 @@ static inline void __deprecated save_and_cli(unsigned long *x) do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) extern void local_bh_enable(void); +#endif /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes -- 2.30.2